ORIG_EAX = 0x24
EIP = 0x28
CS = 0x2C
+EVENT_MASK = 0x2E
EFLAGS = 0x30
OLDESP = 0x34
OLDSS = 0x38
#define XEN_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
#define XEN_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
-#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(%reg)
+#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
#ifdef CONFIG_PREEMPT
-#define preempt_stop movl HYPERVISOR_shared_info,%esi ; \
- XEN_BLOCK_EVENTS(%esi)
+#define preempt_stop XEN_BLOCK_EVENTS(%esi)
#else
#define preempt_stop
#define resume_kernel restore_all
#endif
-#define SAVE_ALL \
+#define SAVE_ALL_NO_EVENTMASK \
cld; \
pushl %es; \
pushl %ds; \
pushl %ebx; \
movl $(__USER_DS), %edx; \
movl %edx, %ds; \
- movl %edx, %es;
+ movl %edx, %es
+
+#define SAVE_ALL \
+ SAVE_ALL_NO_EVENTMASK; \
+ movl HYPERVISOR_shared_info, %esi; \
+ movb evtchn_upcall_mask(%esi), %dl; \
+ movb %dl, EVENT_MASK(%esp)
#define RESTORE_INT_REGS \
popl %ebx; \
testl $(VM_MASK | 2), %eax
jz resume_kernel # returning to kernel or vm86-space
ENTRY(resume_userspace)
- movl HYPERVISOR_shared_info,%esi
- XEN_BLOCK_EVENTS(%esi) # make tests atomic
- # make sure we don't miss an interrupt
+ XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
-ret_syscall_tests:
movl TI_flags(%ebp), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
# int/exception return?
jne work_pending
- jmp restore_all_enable_events
+ jmp restore_all
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
- movl HYPERVISOR_shared_info,%esi
cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
- jnz restore_all_enable_events
+ jnz restore_all
need_resched:
movl TI_flags(%ebp), %ecx # need_resched set ?
testb $_TIF_NEED_RESCHED, %cl
- jz restore_all_enable_events
+ jz restore_all
testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
- jz restore_all_enable_events
+ jz restore_all
movl $PREEMPT_ACTIVE,TI_preempt_count(%ebp)
- XEN_UNBLOCK_EVENTS(%esi) # reenable event callbacks
+ XEN_UNBLOCK_EVENTS(%esi)
call schedule
movl $0,TI_preempt_count(%ebp)
- movl HYPERVISOR_shared_info,%esi
- XEN_BLOCK_EVENTS(%esi) # make tests atomic
+ XEN_BLOCK_EVENTS(%esi)
jmp need_resched
#endif
pushl %eax
SAVE_ALL
GET_THREAD_INFO(%ebp)
- cmpl $(nr_syscalls), %eax
- jae syscall_badsys
testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
jnz syscall_trace_entry
+ cmpl $(nr_syscalls), %eax
+ jae syscall_badsys
call *sys_call_table(,%eax,4)
movl %eax,EAX(%esp)
cli
pushl %eax # save orig_eax
SAVE_ALL
GET_THREAD_INFO(%ebp)
- cmpl $(nr_syscalls), %eax
- jae syscall_badsys
# system call tracing in operation
testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
jnz syscall_trace_entry
+ cmpl $(nr_syscalls), %eax
+ jae syscall_badsys
syscall_call:
call *sys_call_table(,%eax,4)
movl %eax,EAX(%esp) # store the return value
syscall_exit:
- movl HYPERVISOR_shared_info,%esi
- XEN_BLOCK_EVENTS(%esi) # make tests atomic
- # make sure we don't miss an interrupt
+ XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
movl TI_flags(%ebp), %ecx
testw $_TIF_ALLWORK_MASK, %cx # current->work
jne syscall_exit_work
- jmp restore_all_enable_events
-
- ALIGN
restore_all:
+ movb EVENT_MASK(%esp), %al
+ notb %al # %al == ~saved_mask
+ andb evtchn_upcall_mask(%esi),%al
+ andb $1,%al # %al == mask & ~saved_mask
+ jnz restore_all_enable_events # != 0 => reenable event delivery
RESTORE_ALL
# perform work that needs to be done immediately before resumption
ALIGN
work_pending:
- XEN_UNBLOCK_EVENTS(%esi) # reenable event callbacks
testb $_TIF_NEED_RESCHED, %cl
jz work_notifysig
work_resched:
call schedule
- movl HYPERVISOR_shared_info,%esi
- XEN_BLOCK_EVENTS(%esi) # make tests atomic
- # make sure we don't miss an interrupt
+ XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
movl TI_flags(%ebp), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
# than syscall tracing?
- jz restore_all_enable_events
- # XXXcl sti missing???
- XEN_UNBLOCK_EVENTS(%esi) # reenable event callbacks
+ jz restore_all
testb $_TIF_NEED_RESCHED, %cl
jnz work_resched
# vm86-space
xorl %edx, %edx
call do_notify_resume
- movl HYPERVISOR_shared_info,%esi
- jmp restore_all_enable_events
+ jmp restore_all
ALIGN
work_notifysig_v86:
movl %eax, %esp
xorl %edx, %edx
call do_notify_resume
- movl HYPERVISOR_shared_info,%esi
- jmp restore_all_enable_events
+ jmp restore_all
# perform syscall exit tracing
ALIGN
# perform syscall exit tracing
ALIGN
syscall_exit_work:
- movl HYPERVISOR_shared_info,%esi
- testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT), %cl
+ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
jz work_pending
- XEN_UNBLOCK_EVENTS(%esi) # reenable event callbacks
- # could let do_syscall_trace() call
+ XEN_UNBLOCK_EVENTS(%esi) # could let do_syscall_trace() call
# schedule() instead
movl %esp, %eax
movl $1, %edx
movl $-ENOSYS,EAX(%esp)
jmp resume_userspace
+#if 0 /* XEN */
+/*
+ * Build the entry stubs and pointer table with
+ * some assembler magic.
+ */
+.data
+ENTRY(interrupt)
+.text
+
+vector=0
+ENTRY(irq_entries_start)
+.rept NR_IRQS
+ ALIGN
+1: pushl $vector-256
+ jmp common_interrupt
+.data
+ .long 1b
+.text
+vector=vector+1
+.endr
+
+ ALIGN
+common_interrupt:
+ SAVE_ALL
+ call do_IRQ
+ jmp ret_from_intr
+
+#define BUILD_INTERRUPT(name, nr) \
+ENTRY(name) \
+ pushl $nr-256; \
+ SAVE_ALL \
+ call smp_/**/name; \
+ jmp ret_from_intr;
+
+/* The include is where all of the SMP etc. interrupts come from */
+#include "entry_arch.h"
+#endif /* XEN */
+
ENTRY(divide_error)
pushl $0 # no error code
pushl $do_divide_error
movl %esp, %edx
pushl %esi # push the error code
pushl %edx # push the pt_regs pointer
- movl $(__KERNEL_DS), %edx # XXXcl USER?
+ movl $(__USER_DS), %edx
movl %edx, %ds
movl %edx, %es
+ movl HYPERVISOR_shared_info, %esi
+ movb evtchn_upcall_mask(%esi), %dl
+ movb %dl, EVENT_MASK+8(%esp)
call *%edi
addl $8, %esp
jmp ret_from_exception
# activation and restart the handler using the previous one.
ENTRY(hypervisor_callback)
pushl %eax
- SAVE_ALL
- GET_THREAD_INFO(%ebp)
+ SAVE_ALL_NO_EVENTMASK
movl EIP(%esp),%eax
cmpl $scrit,%eax
jb 11f
cmpl $ecrit,%eax
jb critical_region_fixup
-11: push %esp
+11: movl HYPERVISOR_shared_info, %esi
+ movb $0, EVENT_MASK(%esp)
+ push %esp
call evtchn_do_upcall
add $4,%esp
- movl HYPERVISOR_shared_info,%esi
- movb CS(%esp),%cl
- test $2,%cl # slow return to ring 2 or 3
- jne ret_syscall_tests
+ jmp ret_from_intr
+
+ ALIGN
restore_all_enable_events:
-safesti:XEN_UNBLOCK_EVENTS(%esi) # reenable event callbacks
+ XEN_UNBLOCK_EVENTS(%esi)
scrit: /**** START OF CRITICAL REGION ****/
- testb $1,evtchn_upcall_pending(%esi)
+ XEN_TEST_PENDING(%esi)
jnz 14f # process more events if necessary...
RESTORE_ALL
14: XEN_BLOCK_EVENTS(%esi)
jne debug_stack_correct
FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
debug_stack_correct:
- pushl $0
- pushl $do_debug
- jmp error_code
+ pushl $-1 # mark this as an int
+ SAVE_ALL
+ movl %esp,%edx
+ pushl $0
+ pushl %edx
+ call do_debug
+ addl $8,%esp
+ testl %eax,%eax
+ jnz restore_all
+ jmp ret_from_exception
-#if 0
+#if 0 /* XEN */
/*
* NMI is doubly nasty. It can happen _while_ we're handling
* a debug fault, and the debug fault hasn't yet been able to
nmi_debug_stack_fixup:
FIX_STACK(24,nmi_stack_correct, 1)
jmp nmi_stack_correct
-#endif
+#endif /* XEN */
ENTRY(int3)
+ pushl $-1 # mark this as an int
+ SAVE_ALL
+ movl %esp,%edx
pushl $0
- pushl $do_int3
- jmp error_code
+ pushl %edx
+ call do_int3
+ addl $8,%esp
+ testl %eax,%eax
+ jnz restore_all
+ jmp ret_from_exception
ENTRY(overflow)
pushl $0
pushl $do_coprocessor_segment_overrun
jmp error_code
-ENTRY(double_fault)
- pushl $do_double_fault
- jmp error_code
-
ENTRY(invalid_TSS)
pushl $do_invalid_TSS
jmp error_code
# This handler is special, because it gets an extra value on its stack,
# which is the linear faulting address.
-#define PAGE_FAULT_STUB(_name1, _name2) \
-ENTRY(_name1) \
- pushl %ds ; \
- pushl %eax ; \
- xorl %eax,%eax ; \
- pushl %ebp ; \
- pushl %edi ; \
- pushl %esi ; \
- pushl %edx ; \
- decl %eax /* eax = -1 */ ; \
- pushl %ecx ; \
- pushl %ebx ; \
- GET_THREAD_INFO(%ebp) ; \
- cld ; \
- movl %es,%ecx ; \
- movl ORIG_EAX(%esp), %esi /* get the error code */ ; \
- movl ES(%esp), %edi /* get the faulting address */ ; \
- movl %eax, ORIG_EAX(%esp) ; \
- movl %ecx, ES(%esp) ; \
- movl %esp,%edx ; \
- pushl %edi /* push the faulting address */ ; \
- pushl %esi /* push the error code */ ; \
- pushl %edx /* push the pt_regs pointer */ ; \
- movl $(__KERNEL_DS),%edx ; \
- movl %edx,%ds ; \
- movl %edx,%es ; \
- call _name2 ; \
- addl $12,%esp ; \
- jmp ret_from_exception ;
-PAGE_FAULT_STUB(page_fault, do_page_fault)
+ENTRY(page_fault)
+ pushl %ds
+ pushl %eax
+ xorl %eax,%eax
+ pushl %ebp
+ pushl %edi
+ pushl %esi
+ pushl %edx
+ decl %eax /* eax = -1 */
+ pushl %ecx
+ pushl %ebx
+ GET_THREAD_INFO(%ebp)
+ cld
+ movl %es,%ecx
+ movl ORIG_EAX(%esp), %esi /* get the error code */
+ movl ES(%esp), %edi /* get the faulting address */
+ movl %eax, ORIG_EAX(%esp)
+ movl %ecx, ES(%esp)
+ movl %esp,%edx
+ pushl %edi /* push the faulting address */
+ pushl %esi /* push the error code */
+ pushl %edx /* push the pt_regs pointer */
+ movl $(__KERNEL_DS),%edx
+ movl %edx,%ds
+ movl %edx,%es
+ movl HYPERVISOR_shared_info, %esi
+ movb evtchn_upcall_mask(%esi), %dl
+ movb %dl, EVENT_MASK+12(%esp)
+ call do_page_fault
+ addl $12,%esp
+ jmp ret_from_exception
#ifdef CONFIG_X86_MCE
ENTRY(machine_check)
.long sys_mq_notify
.long sys_mq_getsetattr
.long sys_ni_syscall /* reserved for kexec */
+ .long sys_waitid
syscall_table_size=(.-sys_call_table)
asmlinkage void bounds(void);
asmlinkage void invalid_op(void);
asmlinkage void device_not_available(void);
-asmlinkage void double_fault(void);
asmlinkage void coprocessor_segment_overrun(void);
asmlinkage void invalid_TSS(void);
asmlinkage void segment_not_present(void);
DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available)
-DO_ERROR( 8, SIGSEGV, "double fault", double_fault)
DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
{ 5, 3, __KERNEL_CS, (unsigned long)bounds },
{ 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
{ 7, 0, __KERNEL_CS, (unsigned long)device_not_available },
- { 8, 0, __KERNEL_CS, (unsigned long)double_fault },
{ 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
{ 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
{ 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },